In [1]:
cd ../..
/code
In [2]:
%run "source/config/notebook_settings.py"
import os
import mlflow
from mlflow.tracking import MlflowClient
from helpsk.utility import read_pickle
import helpsk as hlp

from source.library.utilities import Timer, log_info, get_config

config = get_config()
mlflow_uri = config['MLFLOW']['URI']
log_info(f"MLFlow URI: {mlflow_uri}")

client = MlflowClient(tracking_uri=mlflow_uri)
2022-06-12 02:28:54 - INFO     | MLFlow URI: http://mlflow_server:1235

Get Latest Experiment Run from MLFlow¶

In [3]:
# Get the production model version and actual model
production_model_info = client.get_latest_versions(name=config['MLFLOW']['MODEL_NAME'], stages=['Production'])
assert len(production_model_info) == 1
production_model_info = production_model_info[0]
production_model = read_pickle(client.download_artifacts(
    run_id=production_model_info.run_id,
    path='model/model.pkl'
))
log_info(f"Production Model Version: {production_model_info.version}")
2022-06-12 02:28:55 - INFO     | Production Model Version: 1
In [4]:
# get experiment and latest run info
credit_experiment = client.get_experiment_by_name(name=config['MLFLOW']['EXPERIMENT_NAME'])
runs = client.list_run_infos(experiment_id=credit_experiment.experiment_id)
latest_run = runs[np.argmax([x.start_time for x in runs])]
In [5]:
yaml_path = client.download_artifacts(run_id=latest_run.run_id, path='experiment.yaml')
results = hlp.sklearn_eval.MLExperimentResults.from_yaml_file(yaml_file_name = yaml_path)
In [6]:
# get the best estimator from the BayesSearchCV
best_estimator = read_pickle(client.download_artifacts(
    run_id=latest_run.run_id,
    path='model/model.pkl'
))
In [7]:
best_estimator.model
Out[7]:
Pipeline(steps=[('prep',
                 ColumnTransformer(transformers=[('numeric',
                                                  Pipeline(steps=[('imputer',
                                                                   TransformerChooser(transformer=SimpleImputer())),
                                                                  ('scaler',
                                                                   TransformerChooser()),
                                                                  ('pca',
                                                                   TransformerChooser())]),
                                                  ['duration', 'credit_amount',
                                                   'installment_commitment',
                                                   'residence_since', 'age',
                                                   'existing_credits',
                                                   'num_dependents']),
                                                 ('non_numeric',
                                                  Pipeline(steps...,
                                                                   TransformerChooser(transformer=OneHotEncoder(handle_unknown='ignore')))]),
                                                  ['checking_status',
                                                   'credit_history', 'purpose',
                                                   'savings_status',
                                                   'employment',
                                                   'personal_status',
                                                   'other_parties',
                                                   'property_magnitude',
                                                   'other_payment_plans',
                                                   'housing', 'job',
                                                   'own_telephone',
                                                   'foreign_worker'])])),
                ('model',
                 RandomForestClassifier(n_estimators=500, random_state=42))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
Pipeline(steps=[('prep',
                 ColumnTransformer(transformers=[('numeric',
                                                  Pipeline(steps=[('imputer',
                                                                   TransformerChooser(transformer=SimpleImputer())),
                                                                  ('scaler',
                                                                   TransformerChooser()),
                                                                  ('pca',
                                                                   TransformerChooser())]),
                                                  ['duration', 'credit_amount',
                                                   'installment_commitment',
                                                   'residence_since', 'age',
                                                   'existing_credits',
                                                   'num_dependents']),
                                                 ('non_numeric',
                                                  Pipeline(steps...,
                                                                   TransformerChooser(transformer=OneHotEncoder(handle_unknown='ignore')))]),
                                                  ['checking_status',
                                                   'credit_history', 'purpose',
                                                   'savings_status',
                                                   'employment',
                                                   'personal_status',
                                                   'other_parties',
                                                   'property_magnitude',
                                                   'other_payment_plans',
                                                   'housing', 'job',
                                                   'own_telephone',
                                                   'foreign_worker'])])),
                ('model',
                 RandomForestClassifier(n_estimators=500, random_state=42))])
ColumnTransformer(transformers=[('numeric',
                                 Pipeline(steps=[('imputer',
                                                  TransformerChooser(transformer=SimpleImputer())),
                                                 ('scaler',
                                                  TransformerChooser()),
                                                 ('pca',
                                                  TransformerChooser())]),
                                 ['duration', 'credit_amount',
                                  'installment_commitment', 'residence_since',
                                  'age', 'existing_credits',
                                  'num_dependents']),
                                ('non_numeric',
                                 Pipeline(steps=[('encoder',
                                                  TransformerChooser(transformer=OneHotEncoder(handle_unknown='ignore')))]),
                                 ['checking_status', 'credit_history',
                                  'purpose', 'savings_status', 'employment',
                                  'personal_status', 'other_parties',
                                  'property_magnitude', 'other_payment_plans',
                                  'housing', 'job', 'own_telephone',
                                  'foreign_worker'])])
['duration', 'credit_amount', 'installment_commitment', 'residence_since', 'age', 'existing_credits', 'num_dependents']
TransformerChooser(transformer=SimpleImputer())
SimpleImputer()
SimpleImputer()
TransformerChooser()
TransformerChooser()
['checking_status', 'credit_history', 'purpose', 'savings_status', 'employment', 'personal_status', 'other_parties', 'property_magnitude', 'other_payment_plans', 'housing', 'job', 'own_telephone', 'foreign_worker']
TransformerChooser(transformer=OneHotEncoder(handle_unknown='ignore'))
OneHotEncoder(handle_unknown='ignore')
OneHotEncoder(handle_unknown='ignore')
RandomForestClassifier(n_estimators=500, random_state=42)

Training & Test Data Info¶

In [8]:
client.download_artifacts(run_id=latest_run.run_id, path='x_train.pkl')
Out[8]:
'/code/mlflow-artifact-root/1/4747f97737b9481e9e95e1626a9b92b9/artifacts/x_train.pkl'
In [9]:
with Timer("Loading training/test datasets"):
    X_train = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='x_train.pkl'))
    X_test = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='x_test.pkl'))
    y_train = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='y_train.pkl'))
    y_test = pd.pandas.read_pickle(client.download_artifacts(run_id=latest_run.run_id, path='y_test.pkl'))
2022-06-12 02:28:55 - INFO     | *****Timer Started: Loading training/test datasets
2022-06-12 02:28:55 - INFO     | *****Timer Finished (0.02 seconds)
In [10]:
log_info(X_train.shape)
log_info(len(y_train))

log_info(X_test.shape)
log_info(len(y_test))
2022-06-12 02:28:55 - INFO     | (800, 20)
2022-06-12 02:28:55 - INFO     | 800
2022-06-12 02:28:55 - INFO     | (200, 20)
2022-06-12 02:28:55 - INFO     | 200
In [11]:
np.unique(y_train, return_counts=True)
Out[11]:
(array([0, 1]), array([559, 241]))
In [12]:
np.unique(y_train, return_counts=True)[1] / np.sum(np.unique(y_train, return_counts=True)[1])
Out[12]:
array([0.69875, 0.30125])
In [13]:
np.unique(y_test, return_counts=True)[1] / np.sum(np.unique(y_test, return_counts=True)[1])
Out[13]:
array([0.705, 0.295])

Cross Validation Results¶

Best Scores/Params¶

In [14]:
log_info(f"Best Score: {results.best_score}")
2022-06-12 02:28:55 - INFO     | Best Score: 0.7566610156276207
In [15]:
log_info(f"Best Params: {results.best_params}")
2022-06-12 02:28:55 - INFO     | Best Params: {'model': 'RandomForestClassifier()', 'imputer': 'SimpleImputer()', 'scaler': 'None', 'pca': 'None', 'encoder': 'OneHotEncoder()'}
In [16]:
# Best model from each model-type.
df = results.to_formatted_dataframe(return_style=False, include_rank=True)
df["model_rank"] = df.groupby("model")["roc_auc Mean"].rank(method="first", ascending=False)
df.query('model_rank == 1')
/usr/local/lib/python3.9/site-packages/scipy/stats/_distn_infrastructure.py:2162: RuntimeWarning:

invalid value encountered in multiply

/usr/local/lib/python3.9/site-packages/scipy/stats/_distn_infrastructure.py:2163: RuntimeWarning:

invalid value encountered in multiply

Out[16]:
rank roc_auc Mean roc_auc 95CI.LO roc_auc 95CI.HI model C max_features max_depth n_estimators min_samples_split min_samples_leaf max_samples criterion learning_rate min_child_weight subsample colsample_bytree colsample_bylevel reg_alpha reg_lambda imputer scaler pca encoder model_rank
10 1 0.76 0.65 0.86 RandomForestClassifier() NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN SimpleImputer() None None OneHotEncoder() 1.00
4 4 0.75 0.69 0.80 LogisticRegression() 0.00 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN SimpleImputer(strategy='median') MinMaxScaler() None OneHotEncoder() 1.00
5 5 0.75 0.67 0.83 ExtraTreesClassifier() NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN SimpleImputer() None None OneHotEncoder() 1.00
19 15 0.72 0.63 0.82 XGBClassifier() NaN NaN 3.00 1482.00 NaN NaN NaN NaN 0.07 18.00 0.89 0.64 0.61 0.00 2.09 SimpleImputer() None None CustomOrdinalEncoder() 1.00
In [17]:
results.to_formatted_dataframe(return_style=True,
                               include_rank=True,
                               num_rows=500)
Out[17]:
rank roc_auc Mean roc_auc 95CI.LO roc_auc 95CI.HI model C max_features max_depth n_estimators min_samples_split min_samples_leaf max_samples criterion learning_rate min_child_weight subsample colsample_bytree colsample_bylevel reg_alpha reg_lambda imputer scaler pca encoder
1 0.757 0.650 0.864 RandomForestClassifier() <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() None None OneHotEncoder()
2 0.754 0.635 0.873 RandomForestClassifier() <NA> 0.317 74.000 967.000 30.000 1.000 0.982 gini <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') None PCA('mle') CustomOrdinalEncoder()
3 0.750 0.646 0.855 RandomForestClassifier() <NA> 0.239 41.000 1,886.000 3.000 15.000 0.864 gini <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='most_frequent') None None OneHotEncoder()
4 0.749 0.695 0.804 LogisticRegression() 0.000 <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') MinMaxScaler() None OneHotEncoder()
5 0.748 0.665 0.831 ExtraTreesClassifier() <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() None None OneHotEncoder()
6 0.744 0.641 0.847 LogisticRegression() <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() StandardScaler() None OneHotEncoder()
7 0.740 0.640 0.839 RandomForestClassifier() <NA> 0.445 87.000 1,244.000 33.000 27.000 0.795 gini <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='most_frequent') None None OneHotEncoder()
8 0.738 0.657 0.820 RandomForestClassifier() <NA> 0.502 29.000 1,249.000 25.000 35.000 0.855 gini <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') None None CustomOrdinalEncoder()
9 0.737 0.648 0.826 LogisticRegression() 58.381 <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') StandardScaler() PCA('mle') OneHotEncoder()
10 0.728 0.659 0.798 LogisticRegression() 0.005 <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') StandardScaler() PCA('mle') CustomOrdinalEncoder()
11 0.728 0.642 0.815 ExtraTreesClassifier() <NA> 0.331 21.000 626.000 18.000 39.000 0.861 entropy <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') None None OneHotEncoder()
12 0.728 0.695 0.760 ExtraTreesClassifier() <NA> 0.135 15.000 1,987.000 10.000 39.000 0.708 gini <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') None None CustomOrdinalEncoder()
13 0.726 0.694 0.758 ExtraTreesClassifier() <NA> 0.828 33.000 1,219.000 42.000 36.000 0.511 entropy <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='most_frequent') None None CustomOrdinalEncoder()
14 0.725 0.635 0.815 ExtraTreesClassifier() <NA> 0.768 54.000 909.000 16.000 30.000 0.762 entropy <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='most_frequent') None None OneHotEncoder()
15 0.725 0.627 0.823 XGBClassifier() <NA> <NA> 3.000 1,482.000 <NA> <NA> <NA> <NA> 0.067 18.000 0.889 0.636 0.615 0.000 2.093 SimpleImputer() None None CustomOrdinalEncoder()
16 0.712 0.589 0.835 XGBClassifier() <NA> <NA> 4.000 1,645.000 <NA> <NA> <NA> <NA> 0.260 4.000 0.941 0.869 0.584 0.011 1.117 SimpleImputer() None PCA('mle') OneHotEncoder()
17 0.702 0.659 0.745 LogisticRegression() 0.000 <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer(strategy='median') MinMaxScaler() None CustomOrdinalEncoder()
18 0.699 0.610 0.788 XGBClassifier() <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() None None OneHotEncoder()
19 0.689 0.555 0.823 XGBClassifier() <NA> <NA> 4.000 1,961.000 <NA> <NA> <NA> <NA> 0.271 3.000 0.671 0.797 0.696 0.000 2.150 SimpleImputer(strategy='most_frequent') None PCA('mle') OneHotEncoder()
20 0.500 <NA> <NA> XGBClassifier() <NA> <NA> 8.000 1,155.000 <NA> <NA> <NA> <NA> 0.168 47.000 0.514 0.799 0.881 0.000 1.914 SimpleImputer() None None OneHotEncoder()
In [18]:
results.to_formatted_dataframe(query='model == "RandomForestClassifier()"', include_rank=True)
Out[18]:
rank roc_auc Mean roc_auc 95CI.LO roc_auc 95CI.HI max_features max_depth n_estimators min_samples_split min_samples_leaf max_samples criterion imputer pca encoder
1 0.757 0.650 0.864 <NA> <NA> <NA> <NA> <NA> <NA> <NA> SimpleImputer() None OneHotEncoder()
2 0.754 0.635 0.873 0.317 74.000 967.000 30.000 1.000 0.982 gini SimpleImputer(strategy='median') PCA('mle') CustomOrdinalEncoder()
3 0.750 0.646 0.855 0.239 41.000 1,886.000 3.000 15.000 0.864 gini SimpleImputer(strategy='most_frequent') None OneHotEncoder()
4 0.740 0.640 0.839 0.445 87.000 1,244.000 33.000 27.000 0.795 gini SimpleImputer(strategy='most_frequent') None OneHotEncoder()
5 0.738 0.657 0.820 0.502 29.000 1,249.000 25.000 35.000 0.855 gini SimpleImputer(strategy='median') None CustomOrdinalEncoder()
In [19]:
results.to_formatted_dataframe(query='model == "LogisticRegression()"', include_rank=True)
Out[19]:
rank roc_auc Mean roc_auc 95CI.LO roc_auc 95CI.HI C imputer scaler pca encoder
1 0.749 0.695 0.804 0.000 SimpleImputer(strategy='median') MinMaxScaler() None OneHotEncoder()
2 0.744 0.641 0.847 <NA> SimpleImputer() StandardScaler() None OneHotEncoder()
3 0.737 0.648 0.826 58.381 SimpleImputer(strategy='median') StandardScaler() PCA('mle') OneHotEncoder()
4 0.728 0.659 0.798 0.005 SimpleImputer(strategy='median') StandardScaler() PCA('mle') CustomOrdinalEncoder()
5 0.702 0.659 0.745 0.000 SimpleImputer(strategy='median') MinMaxScaler() None CustomOrdinalEncoder()

BayesSearchCV Performance Over Time¶

In [20]:
results.plot_performance_across_trials(facet_by='model').show()
In [21]:
results.plot_performance_across_trials(query='model == "RandomForestClassifier()"').show()

Variable Performance Over Time¶

In [22]:
results.plot_parameter_values_across_trials(query='model == "RandomForestClassifier()"').show()

Scatter Matrix¶

In [23]:
# results.plot_scatter_matrix(query='model == "RandomForestClassifier()"',
#                             height=1000, width=1000).show()

Variable Performance - Numeric¶

In [24]:
results.plot_performance_numeric_params(query='model == "RandomForestClassifier()"',
                                        height=800)
In [25]:
results.plot_parallel_coordinates(query='model == "RandomForestClassifier()"').show()

Variable Performance - Non-Numeric¶

In [26]:
results.plot_performance_non_numeric_params(query='model == "RandomForestClassifier()"').show()

In [27]:
results.plot_score_vs_parameter(
    query='model == "RandomForestClassifier()"',
    parameter='max_features',
    size='max_depth',
    color='encoder',
)

In [28]:
# results.plot_parameter_vs_parameter(
#     query='model == "XGBClassifier()"',
#     parameter_x='colsample_bytree',
#     parameter_y='learning_rate',
#     size='max_depth'
# )
In [29]:
# results.plot_parameter_vs_parameter(
#     query='model == "XGBClassifier()"',
#     parameter_x='colsample_bytree',
#     parameter_y='learning_rate',
#     size='imputer'
# )

Best Model - Test Set Performance¶

In [30]:
test_predictions = best_estimator.predict(X_test)
test_predictions[0:10]
Out[30]:
array([0.388, 0.506, 0.724, 0.368, 0.056, 0.472, 0.076, 0.47 , 0.18 ,
       0.23 ])
In [31]:
evaluator = hlp.sklearn_eval.TwoClassEvaluator(
    actual_values=y_test,
    predicted_scores=test_predictions,
    score_threshold=0.37
)
In [32]:
evaluator.plot_actual_vs_predict_histogram()
In [33]:
evaluator.plot_confusion_matrix()
In [34]:
evaluator.all_metrics_df(return_style=True,
                         dummy_classifier_strategy=['prior', 'constant'],
                         round_by=3)
Out[34]:
  Score Dummy (prior) Dummy (constant) Explanation
AUC 0.823 0.500 0.500 Area under the ROC curve (true pos. rate vs false pos. rate); ranges from 0.5 (purely random classifier) to 1.0 (perfect classifier)
True Positive Rate 0.746 0.000 1.000 74.6% of positive instances were correctly identified.; i.e. 44 "Positive Class" labels were correctly identified out of 59 instances; a.k.a Sensitivity/Recall
True Negative Rate 0.801 1.000 0.000 80.1% of negative instances were correctly identified.; i.e. 113 "Negative Class" labels were correctly identified out of 141 instances
False Positive Rate 0.199 0.000 1.000 19.9% of negative instances were incorrectly identified as positive; i.e. 28 "Negative Class" labels were incorrectly identified as "Positive Class", out of 141 instances
False Negative Rate 0.254 1.000 0.000 25.4% of positive instances were incorrectly identified as negative; i.e. 15 "Positive Class" labels were incorrectly identified as "Negative Class", out of 59 instances
Positive Predictive Value 0.611 0.000 0.295 When the model claims an instance is positive, it is correct 61.1% of the time; i.e. out of the 72 times the model predicted "Positive Class", it was correct 44 times; a.k.a precision
Negative Predictive Value 0.883 0.705 0.000 When the model claims an instance is negative, it is correct 88.3% of the time; i.e. out of the 128 times the model predicted "Negative Class", it was correct 113 times
F1 Score 0.672 0.000 0.456 The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0.
Precision/Recall AUC 0.662 0.295 0.295 Precision/Recall AUC is calculated with `average_precision` which summarizes a precision-recall curve as the weighted mean of precisions achieved at each threshold. See sci-kit learn documentation for caveats.
Accuracy 0.785 0.705 0.295 78.5% of instances were correctly identified
Error Rate 0.215 0.295 0.705 21.5% of instances were incorrectly identified
% Positive 0.295 0.295 0.295 29.5% of the data are positive; i.e. out of 200 total observations; 59 are labeled as "Positive Class"
Total Observations 200 200 200 There are 200 total observations; i.e. sample size
In [35]:
evaluator.plot_roc_auc_curve().show()
<Figure size 720x444.984 with 0 Axes>
In [36]:
evaluator.plot_precision_recall_auc_curve().show()
In [37]:
evaluator.plot_threshold_curves(score_threshold_range=(0.1, 0.7)).show()
In [38]:
evaluator.plot_precision_recall_tradeoff(score_threshold_range=(0.1, 0.6)).show()
In [39]:
evaluator.calculate_lift_gain(return_style=True)
Out[39]:
  Gain Lift
Percentile    
5 0.14 2.71
10 0.24 2.37
15 0.37 2.49
20 0.49 2.46
25 0.54 2.17
30 0.66 2.20
35 0.71 2.03
40 0.75 1.86
45 0.80 1.77
50 0.83 1.66
55 0.85 1.54
60 0.86 1.44
65 0.90 1.38
70 0.93 1.33
75 0.95 1.27
80 0.97 1.21
85 0.98 1.16
90 1.00 1.11
95 1.00 1.05
100 1.00 1.00

Production Model - Test Set Performance¶

In [40]:
test_predictions = production_model.predict(X_test)
test_predictions[0:10]
Out[40]:
array([0.388, 0.506, 0.724, 0.368, 0.056, 0.472, 0.076, 0.47 , 0.18 ,
       0.23 ])
In [41]:
evaluator = hlp.sklearn_eval.TwoClassEvaluator(
    actual_values=y_test,
    predicted_scores=test_predictions,
    score_threshold=0.37
)
In [42]:
evaluator.plot_actual_vs_predict_histogram()
In [43]:
evaluator.plot_confusion_matrix()
In [44]:
evaluator.all_metrics_df(return_style=True,
                         dummy_classifier_strategy=['prior', 'constant'],
                         round_by=3)
Out[44]:
  Score Dummy (prior) Dummy (constant) Explanation
AUC 0.823 0.500 0.500 Area under the ROC curve (true pos. rate vs false pos. rate); ranges from 0.5 (purely random classifier) to 1.0 (perfect classifier)
True Positive Rate 0.746 0.000 1.000 74.6% of positive instances were correctly identified.; i.e. 44 "Positive Class" labels were correctly identified out of 59 instances; a.k.a Sensitivity/Recall
True Negative Rate 0.801 1.000 0.000 80.1% of negative instances were correctly identified.; i.e. 113 "Negative Class" labels were correctly identified out of 141 instances
False Positive Rate 0.199 0.000 1.000 19.9% of negative instances were incorrectly identified as positive; i.e. 28 "Negative Class" labels were incorrectly identified as "Positive Class", out of 141 instances
False Negative Rate 0.254 1.000 0.000 25.4% of positive instances were incorrectly identified as negative; i.e. 15 "Positive Class" labels were incorrectly identified as "Negative Class", out of 59 instances
Positive Predictive Value 0.611 0.000 0.295 When the model claims an instance is positive, it is correct 61.1% of the time; i.e. out of the 72 times the model predicted "Positive Class", it was correct 44 times; a.k.a precision
Negative Predictive Value 0.883 0.705 0.000 When the model claims an instance is negative, it is correct 88.3% of the time; i.e. out of the 128 times the model predicted "Negative Class", it was correct 113 times
F1 Score 0.672 0.000 0.456 The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0.
Precision/Recall AUC 0.662 0.295 0.295 Precision/Recall AUC is calculated with `average_precision` which summarizes a precision-recall curve as the weighted mean of precisions achieved at each threshold. See sci-kit learn documentation for caveats.
Accuracy 0.785 0.705 0.295 78.5% of instances were correctly identified
Error Rate 0.215 0.295 0.705 21.5% of instances were incorrectly identified
% Positive 0.295 0.295 0.295 29.5% of the data are positive; i.e. out of 200 total observations; 59 are labeled as "Positive Class"
Total Observations 200 200 200 There are 200 total observations; i.e. sample size
In [45]:
evaluator.plot_roc_auc_curve().show()
<Figure size 720x444.984 with 0 Axes>
In [46]:
evaluator.plot_precision_recall_auc_curve().show()
In [47]:
evaluator.plot_threshold_curves(score_threshold_range=(0.1, 0.7)).show()
In [48]:
evaluator.plot_precision_recall_tradeoff(score_threshold_range=(0.1, 0.6)).show()
In [49]:
evaluator.calculate_lift_gain(return_style=True)
Out[49]:
  Gain Lift
Percentile    
5 0.14 2.71
10 0.24 2.37
15 0.37 2.49
20 0.49 2.46
25 0.54 2.17
30 0.66 2.20
35 0.71 2.03
40 0.75 1.86
45 0.80 1.77
50 0.83 1.66
55 0.85 1.54
60 0.86 1.44
65 0.90 1.38
70 0.93 1.33
75 0.95 1.27
80 0.97 1.21
85 0.98 1.16
90 1.00 1.11
95 1.00 1.05
100 1.00 1.00